from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
import time
chrome_driver = './chromedriver.exe'
driver = webdriver.Chrome(chrome_driver)
driver.get('https://www.python.org/')
time.sleep(2)
search = driver.find_element_by_id('id-search-field')
search.clear()
time.sleep(2)
search.send_keys('pytest')
time.sleep(2)
search.send_keys(Keys.RETURN)
# time.sleep(3)
# driver.close()
chrome_driver = './chromedriver.exe'
driver = webdriver.Chrome(chrome_driver)
url = 'https://news.v.daum.net/v/20190728165812603'
driver.get(url)
time.sleep(2)
src = driver.page_source
soup = BeautifulSoup(src)
comment_area = soup.select_one('span.alex-count-area')
driver.close()
comment_area.get_text()
# comment_area
url = 'https://n.news.naver.com/article/094/0000010049?cds=news_media_pc&type=editn'
chrome_driver = './chromedriver.exe'
driver = webdriver.Chrome(chrome_driver)
driver.get(url)
# 해당 element가 로딩 될때까지 대기
myElem = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.u_cbox_count')))
src = driver.page_source
soup = BeautifulSoup(src)
comment_area = soup.select_one('.u_cbox_count')
driver.close()
print(comment_area.get_text())